our goal is to construct a model for the popular game of Rock, Paper, Scissors. This game predominantly revolves around the choices made by players, specifically between scissors, rock, or paper. This characteristic enables us to employ image classification techniques to effectively model the game.
our approach involves capturing the hand gestures of the player and predicting whether they represent rock, paper, or scissors. Subsequently, the computer will make a random selection, leading to a determination of the winner.
As part of our methodology, we utilized the TensorFlow Datasets library, which offers of datasets seamlessly compatible with TensorFlow. For the construction of our model, we made use of the rock paper scissors dataset readily available within the TensorFlow library.
It is worth to mention that we get the idea from the tensorflow dataset and the following notebook, but we built our own model to implement image classification.
#! pip install tensorflow_datasets
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
import plotly
from PIL import Image
plotly.offline.init_notebook_mode()
/opt/conda/lib/python3.10/site-packages/tensorflow_io/python/ops/__init__.py:98: UserWarning: unable to load libtensorflow_io_plugins.so: unable to open file: libtensorflow_io_plugins.so, from paths: ['/opt/conda/lib/python3.10/site-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so']
caused by: ['/opt/conda/lib/python3.10/site-packages/tensorflow_io/python/ops/libtensorflow_io_plugins.so: undefined symbol: _ZN3tsl6StatusC1EN10tensorflow5error4CodeESt17basic_string_viewIcSt11char_traitsIcEENS_14SourceLocationE']
warnings.warn(f"unable to load libtensorflow_io_plugins.so: {e}")
/opt/conda/lib/python3.10/site-packages/tensorflow_io/python/ops/__init__.py:104: UserWarning: file system plugins are not loaded: unable to open file: libtensorflow_io.so, from paths: ['/opt/conda/lib/python3.10/site-packages/tensorflow_io/python/ops/libtensorflow_io.so']
caused by: ['/opt/conda/lib/python3.10/site-packages/tensorflow_io/python/ops/libtensorflow_io.so: undefined symbol: _ZTVN10tensorflow13GcsFileSystemE']
warnings.warn(f"file system plugins are not loaded: {e}")
# Load the Rock Paper Scissors dataset
(train_dataset,test_dataset) , dataset_info = tfds.load('rock_paper_scissors', data_dir='tmp', with_info=True,as_supervised=True, split=[tfds.Split.TRAIN, tfds.Split.TEST])
Downloading and preparing dataset 219.53 MiB (download: 219.53 MiB, generated: Unknown size, total: 219.53 MiB) to tmp/rock_paper_scissors/3.0.0...
Dl Completed...: 0 url [00:00, ? url/s]
Dl Size...: 0 MiB [00:00, ? MiB/s]
Generating splits...: 0%| | 0/2 [00:00<?, ? splits/s]
Generating train examples...: 0%| | 0/2520 [00:00<?, ? examples/s]
Shuffling tmp/rock_paper_scissors/3.0.0.incomplete1XC258/rock_paper_scissors-train.tfrecord*...: 0%| …
Generating test examples...: 0%| | 0/372 [00:00<?, ? examples/s]
Shuffling tmp/rock_paper_scissors/3.0.0.incomplete1XC258/rock_paper_scissors-test.tfrecord*...: 0%| …
Dataset rock_paper_scissors downloaded and prepared to tmp/rock_paper_scissors/3.0.0. Subsequent calls will reuse this data.
dataset_info
tfds.core.DatasetInfo(
name='rock_paper_scissors',
full_name='rock_paper_scissors/3.0.0',
description="""
Images of hands playing rock, paper, scissor game.
""",
homepage='http://laurencemoroney.com/rock-paper-scissors-dataset',
data_path=PosixGPath('/tmp/tmp5upmxuhxtfds'),
file_format=tfrecord,
download_size=219.53 MiB,
dataset_size=219.23 MiB,
features=FeaturesDict({
'image': Image(shape=(300, 300, 3), dtype=uint8),
'label': ClassLabel(shape=(), dtype=int64, num_classes=3),
}),
supervised_keys=('image', 'label'),
disable_shuffling=False,
splits={
'test': <SplitInfo num_examples=372, num_shards=1>,
'train': <SplitInfo num_examples=2520, num_shards=2>,
},
citation="""@ONLINE {rps,
author = "Laurence Moroney",
title = "Rock, Paper, Scissors Dataset",
month = "feb",
year = "2019",
url = "http://laurencemoroney.com/rock-paper-scissors-dataset"
}""",
)
NUM_TRAIN_EXAMPLES = dataset_info.splits['train'].num_examples
NUM_TEST_EXAMPLES = dataset_info.splits['test'].num_examples
NUM_CLASSES = dataset_info.features['label'].num_classes
print('Number of TRAIN examples:', NUM_TRAIN_EXAMPLES)
print('Number of TEST examples:', NUM_TEST_EXAMPLES)
print('Number of label classes:', NUM_CLASSES)
Number of TRAIN examples: 2520 Number of TEST examples: 372 Number of label classes: 3
INPUT_IMG_SIZE_ORIGINAL = dataset_info.features['image'].shape[0]
INPUT_IMG_SHAPE_ORIGINAL = dataset_info.features['image'].shape
INPUT_IMG_SIZE_REDUCED = INPUT_IMG_SIZE_ORIGINAL // 2
INPUT_IMG_SHAPE_REDUCED = (
INPUT_IMG_SIZE_REDUCED,
INPUT_IMG_SIZE_REDUCED,
INPUT_IMG_SHAPE_ORIGINAL[2]
)
INPUT_IMG_SIZE = INPUT_IMG_SIZE_REDUCED
INPUT_IMG_SHAPE = INPUT_IMG_SHAPE_REDUCED
print (INPUT_IMG_SHAPE)
(150, 150, 3)
get_label_name = dataset_info.features['label'].int2str
def preview_dataset(dataset):
plt.figure(figsize=(12, 12))
plot_index = 0
for features in dataset.take(12):
(image, label) = features
plot_index += 1
plt.subplot(3, 4, plot_index)
# plt.axis('Off')
label = get_label_name(label.numpy())
plt.title('Label: %s' % label)
plt.imshow(image.numpy())
def preprocess_image(image , label):
image = tf.cast(image, tf.float32) / 255.0
image = tf.image.resize(image , [INPUT_IMG_SIZE, INPUT_IMG_SIZE])
return image, label
train_dataset = train_dataset.map(preprocess_image)
test_dataset= test_dataset.map(preprocess_image)
(first_image, first_lable) = list(train_dataset)[0]
print('Label:', first_lable.numpy(), '\n')
print('Image shape:', first_image.numpy().shape, '\n')
print(first_image.numpy())
Label: 2 Image shape: (150, 150, 3) [[[0.995098 0.995098 0.995098 ] [0.995098 0.995098 0.995098 ] [0.995098 0.995098 0.995098 ] ... [0.9852941 0.9852941 0.9852941 ] [0.9843137 0.9843137 0.9843137 ] [0.98039216 0.98039216 0.98039216]] [[0.99607843 0.99607843 0.99607843] [0.995098 0.995098 0.995098 ] [0.995098 0.995098 0.995098 ] ... [0.98333335 0.98333335 0.98333335] [0.9813726 0.9813726 0.9813726 ] [0.98333335 0.98333335 0.98333335]] [[0.99607843 0.99607843 0.99607843] [0.9941176 0.9941176 0.9941176 ] [0.9941176 0.9941176 0.9941176 ] ... [0.9852941 0.9852941 0.9852941 ] [0.9852941 0.9852941 0.9852941 ] [0.9813726 0.9813726 0.9813726 ]] ... [[0.9862745 0.9862745 0.9862745 ] [0.98725486 0.98725486 0.98725486] [0.9882353 0.9882353 0.9882353 ] ... [0.9705882 0.9705882 0.9705882 ] [0.97352946 0.97352946 0.97352946] [0.9754902 0.9754902 0.9754902 ]] [[0.9882353 0.9882353 0.9882353 ] [0.98725486 0.98725486 0.98725486] [0.9862745 0.9862745 0.9862745 ] ... [0.9676471 0.9676471 0.9676471 ] [0.97156864 0.97156864 0.97156864] [0.972549 0.972549 0.972549 ]] [[0.9911765 0.9911765 0.9911765 ] [0.9862745 0.9862745 0.9862745 ] [0.9882353 0.9882353 0.9882353 ] ... [0.97352946 0.97352946 0.97352946] [0.9705882 0.9705882 0.9705882 ] [0.97352946 0.97352946 0.97352946]]]
preview_dataset(train_dataset)
This include rotating images, flipping images, changing color, and zoom. Augmentations taken from: https://www.wouterbulten.nl/posts/data-augmentation-using-tensorflow-data-dataset/
def flip(x: tf.Tensor) -> tf.Tensor:
"""Flip augmentation
Args:
x: Image to flip
Returns:
Augmented image
"""
x = tf.image.random_flip_left_right(x)
x = tf.image.random_flip_up_down(x)
return x
def color(x: tf.Tensor) -> tf.Tensor:
"""Color augmentation
Args:
x: Image
Returns:
Augmented image
"""
x = tf.image.random_hue(x, 0.08)
x = tf.image.random_saturation(x, 0.6, 1.6)
x = tf.image.random_brightness(x, 0.09)
x = tf.image.random_contrast(x, 0.1, 1.3)
return x
def rotate(x: tf.Tensor) -> tf.Tensor:
"""Rotation augmentation
Args:
x: Image
Returns:
Augmented image
"""
return tf.image.rot90(x, tf.random.uniform(shape=[], minval=0, maxval=4, dtype=tf.int32))
def zoom(x: tf.Tensor) -> tf.Tensor:
"""Zoom augmentation
Args:
x: Image
Returns:
Augmented image
"""
# Generate 20 crop settings, ranging from a 1% to 20% crop.
scales = list(np.arange(0.8, 1.0, 0.01))
boxes = np.zeros((len(scales), 4))
for i, scale in enumerate(scales):
x1 = y1 = 0.5 - (0.5 * scale)
x2 = y2 = 0.5 + (0.5 * scale)
boxes[i] = [x1, y1, x2, y2]
def random_crop(img):
# Create different crops for an image
crops = tf.image.crop_and_resize([img], boxes=boxes, box_indices=np.zeros(len(scales)), crop_size=(32, 32))
# Return a random crop
return crops[tf.random.uniform(shape=[], minval=0, maxval=len(scales), dtype=tf.int32)]
choice = tf.random.uniform(shape=[], minval=0., maxval=1., dtype=tf.float32)
# Only apply cropping 50% of the time
return tf.cond(choice < 0.5, lambda: x, lambda: random_crop(x))
def augment_data(image, label):
image = flip(image)
image = color(image)
image = rotate(image)
#image = zoom(image)
return image, label
train_dataset_augmented = train_dataset.map(augment_data)
preview_dataset(train_dataset_augmented)
in order to prevent the model learning from order or grouping of images, we will shuffle the training examples.
BATCH_SIZE = 32
train_dataset_augmented_shuffled = train_dataset_augmented.shuffle(
buffer_size=1000
)
train_dataset_augmented_shuffled = train_dataset_augmented.batch(
batch_size=BATCH_SIZE
)
# Prefetch will enable the input pipeline to asynchronously fetch batches while your model is training.
dataset_train_augmented_shuffled = train_dataset_augmented_shuffled.prefetch(
buffer_size=tf.data.experimental.AUTOTUNE
)
test_dataset_shuffled = test_dataset.batch(BATCH_SIZE)
print(train_dataset_augmented_shuffled)
print(test_dataset_shuffled)
<_BatchDataset element_spec=(TensorSpec(shape=(None, 150, 150, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None,), dtype=tf.int64, name=None))> <_BatchDataset element_spec=(TensorSpec(shape=(None, 150, 150, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None,), dtype=tf.int64, name=None))>
# Define the CNN model
#1
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=INPUT_IMG_SHAPE),
tf.keras.layers.MaxPooling2D((2, 2)),
#2
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
#3
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
#4
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax')
])
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 148, 148, 64) 1792
max_pooling2d (MaxPooling2D (None, 74, 74, 64) 0
)
conv2d_1 (Conv2D) (None, 72, 72, 64) 36928
max_pooling2d_1 (MaxPooling (None, 36, 36, 64) 0
2D)
conv2d_2 (Conv2D) (None, 34, 34, 128) 73856
max_pooling2d_2 (MaxPooling (None, 17, 17, 128) 0
2D)
conv2d_3 (Conv2D) (None, 15, 15, 128) 147584
max_pooling2d_3 (MaxPooling (None, 7, 7, 128) 0
2D)
flatten (Flatten) (None, 6272) 0
dropout (Dropout) (None, 6272) 0
dense (Dense) (None, 512) 3211776
dense_1 (Dense) (None, 3) 1539
=================================================================
Total params: 3,473,475
Trainable params: 3,473,475
Non-trainable params: 0
_________________________________________________________________
tf.keras.utils.plot_model(
model,
show_shapes=True,
show_layer_names=True,
)
# we can also add learning rate to optimizer
# adam_optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
rmsprop_optimizer = tf.keras.optimizers.RMSprop(learning_rate=0.001)
# Compile cnn model
model.compile(optimizer= rmsprop_optimizer , loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# Train cnn mmodel
model.fit(train_dataset_augmented_shuffled, epochs=40)
Epoch 1/40 79/79 [==============================] - 15s 64ms/step - loss: 1.1370 - accuracy: 0.3790 Epoch 2/40 79/79 [==============================] - 4s 54ms/step - loss: 0.8969 - accuracy: 0.5909 Epoch 3/40 79/79 [==============================] - 6s 70ms/step - loss: 0.5589 - accuracy: 0.7881 Epoch 4/40 79/79 [==============================] - 4s 54ms/step - loss: 0.3498 - accuracy: 0.8956 Epoch 5/40 79/79 [==============================] - 4s 54ms/step - loss: 0.2546 - accuracy: 0.9242 Epoch 6/40 79/79 [==============================] - 4s 53ms/step - loss: 0.1601 - accuracy: 0.9536 Epoch 7/40 79/79 [==============================] - 4s 55ms/step - loss: 0.1425 - accuracy: 0.9663 Epoch 8/40 79/79 [==============================] - 5s 64ms/step - loss: 0.0912 - accuracy: 0.9746 Epoch 9/40 79/79 [==============================] - 4s 55ms/step - loss: 0.0803 - accuracy: 0.9778 Epoch 10/40 79/79 [==============================] - 4s 53ms/step - loss: 0.0987 - accuracy: 0.9730 Epoch 11/40 79/79 [==============================] - 4s 55ms/step - loss: 0.0528 - accuracy: 0.9877 Epoch 12/40 79/79 [==============================] - 4s 54ms/step - loss: 0.0550 - accuracy: 0.9841 Epoch 13/40 79/79 [==============================] - 4s 53ms/step - loss: 0.0483 - accuracy: 0.9845 Epoch 14/40 79/79 [==============================] - 4s 54ms/step - loss: 0.0452 - accuracy: 0.9913 Epoch 15/40 79/79 [==============================] - 5s 60ms/step - loss: 0.0495 - accuracy: 0.9881 Epoch 16/40 79/79 [==============================] - 4s 56ms/step - loss: 0.0386 - accuracy: 0.9893 Epoch 17/40 79/79 [==============================] - 4s 53ms/step - loss: 0.0573 - accuracy: 0.9905 Epoch 18/40 79/79 [==============================] - 4s 54ms/step - loss: 0.0402 - accuracy: 0.9901 Epoch 19/40 79/79 [==============================] - 4s 53ms/step - loss: 0.0235 - accuracy: 0.9964 Epoch 20/40 79/79 [==============================] - 4s 53ms/step - loss: 0.0513 - accuracy: 0.9901 Epoch 21/40 79/79 [==============================] - 4s 55ms/step - loss: 0.0610 - accuracy: 0.9893 Epoch 22/40 79/79 [==============================] - 5s 60ms/step - loss: 0.0198 - accuracy: 0.9937 Epoch 23/40 79/79 [==============================] - 4s 55ms/step - loss: 0.0614 - accuracy: 0.9921 Epoch 24/40 79/79 [==============================] - 4s 57ms/step - loss: 0.0365 - accuracy: 0.9901 Epoch 25/40 79/79 [==============================] - 4s 56ms/step - loss: 0.0304 - accuracy: 0.9913 Epoch 26/40 79/79 [==============================] - 4s 53ms/step - loss: 0.0205 - accuracy: 0.9933 Epoch 27/40 79/79 [==============================] - 4s 54ms/step - loss: 0.0243 - accuracy: 0.9940 Epoch 28/40 79/79 [==============================] - 4s 53ms/step - loss: 0.0094 - accuracy: 0.9984 Epoch 29/40 79/79 [==============================] - 5s 62ms/step - loss: 0.0427 - accuracy: 0.9925 Epoch 30/40 79/79 [==============================] - 4s 56ms/step - loss: 0.0130 - accuracy: 0.9937 Epoch 31/40 79/79 [==============================] - 4s 53ms/step - loss: 0.0114 - accuracy: 0.9968 Epoch 32/40 79/79 [==============================] - 4s 53ms/step - loss: 0.0239 - accuracy: 0.9952 Epoch 33/40 79/79 [==============================] - 4s 54ms/step - loss: 0.0160 - accuracy: 0.9956 Epoch 34/40 79/79 [==============================] - 4s 55ms/step - loss: 0.0212 - accuracy: 0.9984 Epoch 35/40 79/79 [==============================] - 4s 54ms/step - loss: 0.0093 - accuracy: 0.9976 Epoch 36/40 79/79 [==============================] - 5s 60ms/step - loss: 0.0200 - accuracy: 0.9944 Epoch 37/40 79/79 [==============================] - 5s 58ms/step - loss: 0.0278 - accuracy: 0.9960 Epoch 38/40 79/79 [==============================] - 4s 55ms/step - loss: 0.0217 - accuracy: 0.9948 Epoch 39/40 79/79 [==============================] - 4s 53ms/step - loss: 0.0618 - accuracy: 0.9952 Epoch 40/40 79/79 [==============================] - 4s 54ms/step - loss: 0.0118 - accuracy: 0.9980
<keras.callbacks.History at 0x7a36e020fb80>
# Evaluate cnn model
model.evaluate(test_dataset_shuffled)
12/12 [==============================] - 1s 104ms/step - loss: 1.2679 - accuracy: 0.9462
[1.2678923606872559, 0.9462365508079529]
BATCH_SIZE = 32
steps_per_epoch = NUM_TRAIN_EXAMPLES // BATCH_SIZE
validation_steps = NUM_TEST_EXAMPLES // BATCH_SIZE
print('steps_per_epoch:', steps_per_epoch)
print('validation_steps:', validation_steps)
steps_per_epoch: 78 validation_steps: 11
!rm -rf tmp/checkpoints
!rm -rf logs
import os
import datetime
# Preparing callbacks.
os.makedirs('logs/fit', exist_ok=True)
tensorboard_log_dir = 'logs/fit/' + datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=tensorboard_log_dir,
histogram_freq=1
)
os.makedirs('tmp/checkpoints', exist_ok=True)
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath='tmp/checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5'
)
early_stopping_callback = tf.keras.callbacks.EarlyStopping(
patience=5,
monitor='val_accuracy'
# monitor='val_loss'
)
training_history = model.fit(
x= train_dataset_augmented_shuffled.repeat(),
validation_data=test_dataset_shuffled.repeat(),
epochs=40,
steps_per_epoch=steps_per_epoch,
validation_steps=validation_steps,
callbacks=[
# model_checkpoint_callback,
# early_stopping_callback,
tensorboard_callback
],
verbose=1
)
Epoch 1/40 78/78 [==============================] - 6s 73ms/step - loss: 0.0322 - accuracy: 0.9948 - val_loss: 0.6279 - val_accuracy: 0.9545 Epoch 2/40 78/78 [==============================] - 6s 81ms/step - loss: 0.0173 - accuracy: 0.9972 - val_loss: 0.9980 - val_accuracy: 0.9489 Epoch 3/40 78/78 [==============================] - 6s 75ms/step - loss: 0.0014 - accuracy: 1.0000 - val_loss: 1.0060 - val_accuracy: 0.9517 Epoch 4/40 78/78 [==============================] - 6s 74ms/step - loss: 0.0210 - accuracy: 0.9972 - val_loss: 1.1027 - val_accuracy: 0.9545 Epoch 5/40 78/78 [==============================] - 6s 75ms/step - loss: 0.0214 - accuracy: 0.9948 - val_loss: 1.1194 - val_accuracy: 0.9517 Epoch 6/40 78/78 [==============================] - 5s 65ms/step - loss: 0.0261 - accuracy: 0.9956 - val_loss: 0.9814 - val_accuracy: 0.9318 Epoch 7/40 78/78 [==============================] - 5s 67ms/step - loss: 0.0193 - accuracy: 0.9964 - val_loss: 2.0795 - val_accuracy: 0.8636 Epoch 8/40 78/78 [==============================] - 6s 78ms/step - loss: 0.0195 - accuracy: 0.9964 - val_loss: 0.6560 - val_accuracy: 0.9403 Epoch 9/40 78/78 [==============================] - 5s 64ms/step - loss: 0.0073 - accuracy: 0.9980 - val_loss: 0.8258 - val_accuracy: 0.8920 Epoch 10/40 78/78 [==============================] - 5s 66ms/step - loss: 0.0136 - accuracy: 0.9988 - val_loss: 0.9657 - val_accuracy: 0.9205 Epoch 11/40 78/78 [==============================] - 5s 66ms/step - loss: 0.0166 - accuracy: 0.9968 - val_loss: 0.9379 - val_accuracy: 0.9347 Epoch 12/40 78/78 [==============================] - 6s 82ms/step - loss: 0.0215 - accuracy: 0.9980 - val_loss: 0.9865 - val_accuracy: 0.9091 Epoch 13/40 78/78 [==============================] - 7s 88ms/step - loss: 0.0190 - accuracy: 0.9980 - val_loss: 0.6597 - val_accuracy: 0.9489 Epoch 14/40 78/78 [==============================] - 6s 72ms/step - loss: 0.0049 - accuracy: 0.9992 - val_loss: 1.5706 - val_accuracy: 0.9205 Epoch 15/40 78/78 [==============================] - 5s 64ms/step - loss: 0.0302 - accuracy: 0.9956 - val_loss: 0.5279 - val_accuracy: 0.9659 Epoch 16/40 78/78 [==============================] - 5s 70ms/step - loss: 0.0061 - accuracy: 0.9984 - val_loss: 0.8885 - val_accuracy: 0.8750 Epoch 17/40 78/78 [==============================] - 5s 63ms/step - loss: 0.0132 - accuracy: 0.9976 - val_loss: 0.5495 - val_accuracy: 0.9290 Epoch 18/40 78/78 [==============================] - 5s 69ms/step - loss: 0.0044 - accuracy: 0.9996 - val_loss: 1.3008 - val_accuracy: 0.8750 Epoch 19/40 78/78 [==============================] - 6s 79ms/step - loss: 0.0033 - accuracy: 0.9988 - val_loss: 3.3813 - val_accuracy: 0.8182 Epoch 20/40 78/78 [==============================] - 5s 66ms/step - loss: 0.0167 - accuracy: 0.9976 - val_loss: 1.0709 - val_accuracy: 0.9091 Epoch 21/40 78/78 [==============================] - 5s 63ms/step - loss: 0.0062 - accuracy: 0.9980 - val_loss: 1.1053 - val_accuracy: 0.9489 Epoch 22/40 78/78 [==============================] - 5s 68ms/step - loss: 0.0311 - accuracy: 0.9972 - val_loss: 0.8387 - val_accuracy: 0.9290 Epoch 23/40 78/78 [==============================] - 5s 67ms/step - loss: 0.0049 - accuracy: 0.9992 - val_loss: 0.6118 - val_accuracy: 0.9347 Epoch 24/40 78/78 [==============================] - 5s 67ms/step - loss: 0.0098 - accuracy: 0.9976 - val_loss: 0.6217 - val_accuracy: 0.9432 Epoch 25/40 78/78 [==============================] - 6s 77ms/step - loss: 0.0025 - accuracy: 0.9996 - val_loss: 0.8800 - val_accuracy: 0.9460 Epoch 26/40 78/78 [==============================] - 5s 60ms/step - loss: 0.0149 - accuracy: 0.9976 - val_loss: 1.4190 - val_accuracy: 0.8949 Epoch 27/40 78/78 [==============================] - 5s 66ms/step - loss: 0.0173 - accuracy: 0.9976 - val_loss: 2.3274 - val_accuracy: 0.8636 Epoch 28/40 78/78 [==============================] - 6s 72ms/step - loss: 0.0070 - accuracy: 0.9992 - val_loss: 0.8087 - val_accuracy: 0.9290 Epoch 29/40 78/78 [==============================] - 5s 67ms/step - loss: 0.0118 - accuracy: 0.9984 - val_loss: 1.3302 - val_accuracy: 0.8864 Epoch 30/40 78/78 [==============================] - 5s 66ms/step - loss: 0.0086 - accuracy: 0.9984 - val_loss: 0.9249 - val_accuracy: 0.9460 Epoch 31/40 78/78 [==============================] - 6s 80ms/step - loss: 0.0021 - accuracy: 0.9996 - val_loss: 0.8826 - val_accuracy: 0.9347 Epoch 32/40 78/78 [==============================] - 5s 61ms/step - loss: 0.0098 - accuracy: 0.9976 - val_loss: 0.7692 - val_accuracy: 0.9574 Epoch 33/40 78/78 [==============================] - 5s 68ms/step - loss: 1.1891e-04 - accuracy: 1.0000 - val_loss: 0.8732 - val_accuracy: 0.9574 Epoch 34/40 78/78 [==============================] - 6s 71ms/step - loss: 0.0111 - accuracy: 0.9984 - val_loss: 0.6035 - val_accuracy: 0.9517 Epoch 35/40 78/78 [==============================] - 5s 61ms/step - loss: 0.0048 - accuracy: 0.9996 - val_loss: 0.6229 - val_accuracy: 0.9403 Epoch 36/40 78/78 [==============================] - 5s 69ms/step - loss: 0.0089 - accuracy: 0.9964 - val_loss: 0.9306 - val_accuracy: 0.9205 Epoch 37/40 78/78 [==============================] - 6s 81ms/step - loss: 6.4737e-05 - accuracy: 1.0000 - val_loss: 1.4043 - val_accuracy: 0.9034 Epoch 38/40 78/78 [==============================] - 5s 68ms/step - loss: 0.0064 - accuracy: 0.9988 - val_loss: 0.7957 - val_accuracy: 0.9318 Epoch 39/40 78/78 [==============================] - 5s 68ms/step - loss: 0.0042 - accuracy: 0.9996 - val_loss: 0.8129 - val_accuracy: 0.9290 Epoch 40/40 78/78 [==============================] - 5s 64ms/step - loss: 1.7076e-04 - accuracy: 1.0000 - val_loss: 0.7205 - val_accuracy: 0.9460
def render_training_history(training_history):
loss = training_history.history['loss']
val_loss = training_history.history['val_loss']
accuracy = training_history.history['accuracy']
val_accuracy = training_history.history['val_accuracy']
plt.figure(figsize=(14, 4))
plt.subplot(1, 2, 1)
plt.title('Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.plot(loss, label='Training set')
plt.plot(val_loss, label='Test set', linestyle='--')
plt.legend()
plt.grid(linestyle='--', linewidth=1, alpha=0.5)
plt.subplot(1, 2, 2)
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.plot(accuracy, label='Training set')
plt.plot(val_accuracy, label='Test set', linestyle='--')
plt.legend()
plt.grid(linestyle='--', linewidth=1, alpha=0.5)
plt.show()
render_training_history(training_history)
import plotly.express as px
import matplotlib.pyplot as plt
# Assuming you have the accuracy values stored in lists or arrays
accuracy = training_history.history['accuracy']
val_accuracy = training_history.history['val_accuracy']
# Assuming epoch values range from 0 to 39 (40 values)
epoch = list(range(40))
# Create the scatter plot
fig = px.scatter()
# Add the training accuracy trace
fig.add_scatter(x=epoch, y=accuracy, name='Training set')
# Add the validation accuracy trace
fig.add_scatter(x=epoch, y=val_accuracy, name='Validation set')
fig.update_layout(
title='Accuracy vs Epoch',
xaxis_title='Epoch',
yaxis_title='Accuracy',
yaxis_range=[0, 1.0] # Set the range of the y-axis for accuracy
)
fig.show()
import plotly.express as px
# Assuming you have the loss values stored in lists or arrays
loss = training_history.history['loss']
val_loss = training_history.history['val_loss']
# Assuming epoch values range from 0 to 39 (40 values)
epoch = list(range(40))
# Create the scatter plot
fig = px.scatter()
# Add the training loss trace
fig.add_scatter(x=epoch, y=loss, name='Training Loss')
# Add the validation loss trace
fig.add_scatter(x=epoch, y=val_loss, name='Validation Loss')
fig.update_layout(
title='Loss vs Epoch',
xaxis_title='Epoch',
yaxis_title='Loss',
yaxis=dict(range=[0, 4]) # Set the range of the y-axis for loss
)
fig.show()
# save cnn model
model_name = 'rock_paper_scissors_cnn_updated.h5'
model.save(model_name, save_format='h5')
! pip install plotly
Requirement already satisfied: plotly in /opt/conda/lib/python3.10/site-packages (5.14.1) Requirement already satisfied: tenacity>=6.2.0 in /opt/conda/lib/python3.10/site-packages (from plotly) (8.2.2) Requirement already satisfied: packaging in /opt/conda/lib/python3.10/site-packages (from plotly) (21.3) Requirement already satisfied: pyparsing!=3.0.5,>=2.0.2 in /opt/conda/lib/python3.10/site-packages (from packaging->plotly) (3.0.9) WARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv
import matplotlib.pyplot as plt
plt.switch_backend('tkagg')
$$ \text{{accuracy}} = \frac{{\text{{correct predictions}}}}{{\text{{total predictions}}}} $$